DEFINE(XSI_METAPHYS_OFS, offsetof(vcpu_info_t, arch.metaphysical_mode));
DEFINE(XSI_INCOMPL_REG_OFS, offsetof(vcpu_info_t, arch.incomplete_regframe));
DEFINE(XSI_PEND_OFS, offsetof(vcpu_info_t, arch.pending_interruption));
+ DEFINE(XSI_RR0_OFS, offsetof(vcpu_info_t, arch.rrs[0]));
//DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked));
//DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid));
//DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct exec_domain, arch._thread.ksp));
DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct exec_domain, arch._thread.on_ustack));
+ DEFINE(IA64_VCPU_META_RR0_OFFSET, offsetof (struct exec_domain, arch.metaphysical_rr0));
+ DEFINE(IA64_VCPU_META_SAVED_RR0_OFFSET, offsetof (struct exec_domain, arch.metaphysical_saved_rr0));
+
BLANK();
//DEFINE(IA64_SIGHAND_SIGLOCK_OFFSET,offsetof (struct sighand_struct, siglock));
}
#endif
d->max_pages = (128*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
- if ((d->metaphysical_rid = allocate_metaphysical_rid()) == -1UL)
+ if ((d->arch.metaphysical_rr0 = allocate_metaphysical_rr0()) == -1UL)
BUG();
ed->vcpu_info->arch.metaphysical_mode = 1;
+ ed->arch.metaphysical_rr0 = d->arch.metaphysical_rr0;
+ ed->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0;
#define DOMAIN_RID_BITS_DEFAULT 18
if (!allocate_rid_range(d,DOMAIN_RID_BITS_DEFAULT)) // FIXME
BUG();
// returns -1 if none available
-unsigned long allocate_metaphysical_rid(void)
+unsigned long allocate_metaphysical_rr0(void)
{
- unsigned long rid = allocate_reserved_rid();
+ ia64_rr rrv;
+
+ rrv.rid = allocate_reserved_rid();
+ rrv.ps = PAGE_SHIFT;
+ rrv.ve = 0;
+ return rrv.rrval;
}
int deallocate_metaphysical_rid(unsigned long rid)
if (rreg == 6) newrrv.ve = VHPT_ENABLED_REGION_7;
else newrrv.ve = VHPT_ENABLED_REGION_0_TO_6;
newrrv.ps = PAGE_SHIFT;
+ if (rreg == 0) ed->arch.metaphysical_saved_rr0 = newrrv.rrval;
set_rr(rr,newrrv.rrval);
}
return 1;
}
// set rr0 to the passed rid (for metaphysical mode so don't use domain offset
-int set_metaphysical_rr(unsigned long rr, unsigned long rid)
+int set_metaphysical_rr0(void)
{
+ struct exec_domain *ed = current;
ia64_rr rrv;
- rrv.rrval = 0;
- rrv.rid = rid;
- rrv.ps = PAGE_SHIFT;
// rrv.ve = 1; FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
- rrv.ve = 0;
- set_rr(rr,rrv.rrval);
+ set_rr(0,ed->arch.metaphysical_rr0);
}
// validates/changes region registers 0-6 in the currently executing domain
{
ia64_rr rrv;
- rrv.rrval = 0;
- rrv.rid = ed->domain->metaphysical_rid;
+ rrv.rrval = ed->domain->arch.metaphysical_rr0;
rrv.ps = PAGE_SHIFT;
rrv.ve = 1;
if (!ed->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); }
if (ed->vcpu_info->arch.metaphysical_mode) {
ia64_rr rrv;
- rrv.rrval = 0;
- rrv.rid = ed->domain->metaphysical_rid;
- rrv.ps = PAGE_SHIFT;
+ rrv.rrval = ed->domain->arch.metaphysical_rr0;
rrv.ve = 1;
rr0 = rrv.rrval;
set_rr_no_srlz(0x0000000000000000L, rr0);
{
/* only do something if mode changes */
if (!!newmode ^ !!PSCB(vcpu,metaphysical_mode)) {
- if (newmode) set_metaphysical_rr(0,vcpu->domain->metaphysical_rid);
+ if (newmode) set_metaphysical_rr0();
else if (PSCB(vcpu,rrs[0]) != -1)
set_one_rr(0, PSCB(vcpu,rrs[0]));
PSCB(vcpu,metaphysical_mode) = newmode;
return IA64_NO_FAULT;
}
+IA64FAULT vcpu_set_psr_i(VCPU *vcpu)
+{
+ PSCB(vcpu,interrupt_delivery_enabled) = 1;
+ return IA64_NO_FAULT;
+}
+
IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
{
struct ia64_psr psr, imm, *ipsr;
struct arch_domain {
struct mm_struct *active_mm;
struct mm_struct *mm;
- int metaphysical_rid;
+ int metaphysical_rr0;
int starting_rid; /* first RID assigned to domain */
int ending_rid; /* one beyond highest RID assigned to domain */
int rid_bits; /* number of virtual rid bits (default: 18) */
u64 entry;
#endif
};
-#define metaphysical_rid arch.metaphysical_rid
#define starting_rid arch.starting_rid
#define ending_rid arch.ending_rid
#define rid_bits arch.rid_bits
unsigned long xen_timer_interval;
#endif
void *regs; /* temporary until find a better way to do privops */
+ int metaphysical_rr0; // from arch_domain (so is pinned)
+ int metaphysical_saved_rr0; // from arch_domain (so is pinned)
struct mm_struct *active_mm;
struct thread_struct _thread; // this must be last
#ifdef CONFIG_VTI